How to train Keras model and serve on Online Predicition

Example notebook to use cloudmlmagic. Before you begin, install cloudmlmagic.

$ pip install cloudmlmagic

In [ ]:
#Load cloudmlmagic extention
%load_ext cloudmlmagic

In [ ]:
# Initialize Cloud ML Engine client library
# Make sure you call this magic before adding code or run
# Do NOT forget to add runtimeVersion as you use Keras

%ml_init -projectId PROJECTID -bucket BUCKET -scaleTier BASIC -runtimeVersion 1.2

In [ ]:
%%ml_code

import tensorflow as tf
from tensorflow.contrib.keras.python import keras
from tensorflow.contrib.keras.python.keras.models import Sequential
from tensorflow.contrib.keras.python.keras.layers.core import Dense, Activation
from sklearn.cross_validation import train_test_split

iris = tf.contrib.learn.datasets.base.load_iris()
train_x, test_x, train_y, test_y = train_test_split(
    iris.data, iris.target, test_size=0.2)

num_classes = 3
train_y = keras.utils.to_categorical(train_y, num_classes)
test_y = keras.utils.to_categorical(test_y, num_classes)

model = Sequential()
model.add(Dense(10, activation='relu', input_shape=(4,)))
model.add(Dense(20, activation='relu'))
model.add(Dense(10, activation='relu'))
model.add(Dense(3, activation='softmax'))

model.compile(loss='categorical_crossentropy',
              optimizer='sgd',
              metrics=['accuracy'])

cb = keras.callbacks.TensorBoard(
    log_dir="gs://BUCKET/keras-mlengine", histogram_freq=1)

In [ ]:
%%ml_run cloud

# train
model.fit(train_x, train_y,
          batch_size=100,
          epochs=20,
          verbose=2,
          callbacks=[cb],
          validation_data=(test_x, test_y))

# eval
score = model.evaluate(test_x, test_y, verbose=0)
pred = model.predict(test_x)

# Add Signature to the model, so that ML Engine can feed features
from tensorflow.contrib.keras import backend
sess = backend.get_session()
x = sess.graph.get_tensor_by_name('dense_1_input:0')
y = sess.graph.get_tensor_by_name('ArgMax_1:0')
inputs = {"dense_1_input": tf.saved_model.utils.build_tensor_info(x)}
outputs = {"ArgMax_1": tf.saved_model.utils.build_tensor_info(y)}
signature = tf.saved_model.signature_def_utils.build_signature_def(
    inputs=inputs,
    outputs=outputs,
    method_name=tf.saved_model.signature_constants.PREDICT_METHOD_NAME
)

# save as SavedModel
b = tf.saved_model.builder.SavedModelBuilder('gs://BUCKET/keras-mlengine/savedmodel')
b.add_meta_graph_and_variables(sess,
                               [tf.saved_model.tag_constants.SERVING],
                               signature_def_map={'serving_default': signature})
b.save()

Online Prediction

Regsiter "SavedModel" you have created above to Cloud ML Engine.


In [ ]:
from oauth2client.client import GoogleCredentials
from googleapiclient import discovery
from googleapiclient import errors

PROJECTID = 'PROJECTID'
projectID = 'projects/{}'.format(PROJECTID)
modelName = 'keras-iris'
modelID = '{}/models/{}'.format(projectID, modelName)

credentials = GoogleCredentials.get_application_default()
ml = discovery.build('ml', 'v1', credentials=credentials)

In [ ]:
request_body = {'instances': [{'dense_1_input': [5.4,  3.9,  1.3,  0.4]},
                              {'dense_1_input': [4.4,  3.2,  1.3,  0.2]},
                              {'dense_1_input': [4.3,  3.,  1.1,  0.1]},
                              {'dense_1_input': [5.,  3.5,  1.6,  0.6]},
                              {'dense_1_input': [5.9,  3.,  4.2,  1.5]},
                              {'dense_1_input': [7.7,  3.,  6.1,  2.3]},
                              ]}

request = ml.projects().predict(name=modelID, body=request_body)
try:
    response = request.execute()
except errors.HttpError as err:
    # Something went wrong with the HTTP transaction.
    # To use logging, you need to 'import logging'.
    print('There was an HTTP error during the request:')
    print(err._get_reason())
response